Search in sources :

Example 76 with TestingServer

use of org.apache.curator.test.TestingServer in project druid by druid-io.

the class TestKafkaExtractionCluster method setUp.

@Before
public void setUp() throws Exception {
    zkTestServer = new TestingServer(-1, temporaryFolder.newFolder(), true);
    zkTestServer.start();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            zkTestServer.stop();
        }
    });
    zkClient = new ZkClient(zkTestServer.getConnectString(), 10000, 10000, ZKStringSerializer$.MODULE$);
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            zkClient.close();
        }
    });
    if (!zkClient.exists("/kafka")) {
        zkClient.create("/kafka", null, CreateMode.PERSISTENT);
    }
    log.info("---------------------------Started ZK---------------------------");
    final String zkKafkaPath = "/kafka";
    final Properties serverProperties = new Properties();
    serverProperties.putAll(kafkaProperties);
    serverProperties.put("broker.id", "0");
    serverProperties.put("log.dir", temporaryFolder.newFolder().getAbsolutePath());
    serverProperties.put("log.cleaner.enable", "true");
    serverProperties.put("host.name", "127.0.0.1");
    serverProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
    serverProperties.put("zookeeper.session.timeout.ms", "10000");
    serverProperties.put("zookeeper.sync.time.ms", "200");
    kafkaConfig = new KafkaConfig(serverProperties);
    final long time = DateTime.parse("2015-01-01").getMillis();
    kafkaServer = new KafkaServer(kafkaConfig, new Time() {

        @Override
        public long milliseconds() {
            return time;
        }

        @Override
        public long nanoseconds() {
            return milliseconds() * 1_000_000;
        }

        @Override
        public void sleep(long ms) {
            try {
                Thread.sleep(ms);
            } catch (InterruptedException e) {
                throw Throwables.propagate(e);
            }
        }
    });
    kafkaServer.startup();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            kafkaServer.shutdown();
            kafkaServer.awaitShutdown();
        }
    });
    int sleepCount = 0;
    while (!kafkaServer.kafkaController().isActive()) {
        Thread.sleep(100);
        if (++sleepCount > 10) {
            throw new InterruptedException("Controller took to long to awaken");
        }
    }
    log.info("---------------------------Started Kafka Server---------------------------");
    final ZkClient zkClient = new ZkClient(zkTestServer.getConnectString() + zkKafkaPath, 10000, 10000, ZKStringSerializer$.MODULE$);
    try (final AutoCloseable autoCloseable = new AutoCloseable() {

        @Override
        public void close() throws Exception {
            if (zkClient.exists(zkKafkaPath)) {
                try {
                    zkClient.deleteRecursive(zkKafkaPath);
                } catch (org.I0Itec.zkclient.exception.ZkException ex) {
                    log.warn(ex, "error deleting %s zk node", zkKafkaPath);
                }
            }
            zkClient.close();
        }
    }) {
        final Properties topicProperties = new Properties();
        topicProperties.put("cleanup.policy", "compact");
        if (!AdminUtils.topicExists(zkClient, topicName)) {
            AdminUtils.createTopic(zkClient, topicName, 1, 1, topicProperties);
        }
        log.info("---------------------------Created topic---------------------------");
        Assert.assertTrue(AdminUtils.topicExists(zkClient, topicName));
    }
    final Properties kafkaProducerProperties = makeProducerProperties();
    final Producer<byte[], byte[]> producer = new Producer<>(new ProducerConfig(kafkaProducerProperties));
    try (final AutoCloseable autoCloseable = new AutoCloseable() {

        @Override
        public void close() throws Exception {
            producer.close();
        }
    }) {
        producer.send(new KeyedMessage<>(topicName, StringUtils.toUtf8("abcdefg"), StringUtils.toUtf8("abcdefg")));
    }
    System.setProperty("druid.extensions.searchCurrentClassloader", "false");
    injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.of(new Module() {

        @Override
        public void configure(Binder binder) {
            binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test");
            binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0);
        }
    }, // These injections fail under IntelliJ but are required for maven
    new NamespaceExtractionModule(), new KafkaExtractionNamespaceModule()));
    mapper = injector.getInstance(ObjectMapper.class);
    log.info("--------------------------- placed default item via producer ---------------------------");
    final Map<String, String> consumerProperties = new HashMap<>(kafkaProperties);
    consumerProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
    consumerProperties.put("zookeeper.session.timeout.ms", "10000");
    consumerProperties.put("zookeeper.sync.time.ms", "200");
    final KafkaLookupExtractorFactory kafkaLookupExtractorFactory = new KafkaLookupExtractorFactory(null, topicName, consumerProperties);
    factory = (KafkaLookupExtractorFactory) mapper.readValue(mapper.writeValueAsString(kafkaLookupExtractorFactory), LookupExtractorFactory.class);
    Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaTopic(), factory.getKafkaTopic());
    Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaProperties(), factory.getKafkaProperties());
    factory.start();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            factory.close();
        }
    });
    log.info("--------------------------- started rename manager ---------------------------");
}
Also used : NamespaceExtractionModule(io.druid.server.lookup.namespace.NamespaceExtractionModule) HashMap(java.util.HashMap) Closeable(java.io.Closeable) DateTime(org.joda.time.DateTime) Time(kafka.utils.Time) Properties(java.util.Properties) Binder(com.google.inject.Binder) ProducerConfig(kafka.producer.ProducerConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) TestingServer(org.apache.curator.test.TestingServer) ZkClient(org.I0Itec.zkclient.ZkClient) IOException(java.io.IOException) KafkaServer(kafka.server.KafkaServer) Producer(kafka.javaapi.producer.Producer) Module(com.google.inject.Module) NamespaceExtractionModule(io.druid.server.lookup.namespace.NamespaceExtractionModule) KafkaConfig(kafka.server.KafkaConfig) Before(org.junit.Before)

Example 77 with TestingServer

use of org.apache.curator.test.TestingServer in project druid by druid-io.

the class OverlordTest method setupServerAndCurator.

private void setupServerAndCurator() throws Exception {
    server = new TestingServer();
    timing = new Timing();
    curator = CuratorFrameworkFactory.builder().connectString(server.getConnectString()).sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()).retryPolicy(new RetryOneTime(1)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) RetryOneTime(org.apache.curator.retry.RetryOneTime) Timing(org.apache.curator.test.Timing) PotentiallyGzippedCompressionProvider(io.druid.curator.PotentiallyGzippedCompressionProvider)

Example 78 with TestingServer

use of org.apache.curator.test.TestingServer in project flink by apache.

the class WebRuntimeMonitorITCase method testRedirectToLeader.

/**
	 * Tests that the monitor associated with the following job manager redirects to the leader.
	 */
@Test
public void testRedirectToLeader() throws Exception {
    final Deadline deadline = TestTimeout.fromNow();
    ActorSystem[] jobManagerSystem = new ActorSystem[2];
    WebRuntimeMonitor[] webMonitor = new WebRuntimeMonitor[2];
    List<LeaderRetrievalService> leaderRetrievalServices = new ArrayList<>();
    try (TestingServer zooKeeper = new TestingServer()) {
        final Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(zooKeeper.getConnectString(), temporaryFolder.getRoot().getPath());
        File logDir = temporaryFolder.newFolder();
        Path logFile = Files.createFile(new File(logDir, "jobmanager.log").toPath());
        Files.createFile(new File(logDir, "jobmanager.out").toPath());
        config.setInteger(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, 0);
        config.setString(ConfigConstants.JOB_MANAGER_WEB_LOG_PATH_KEY, logFile.toString());
        for (int i = 0; i < jobManagerSystem.length; i++) {
            jobManagerSystem[i] = AkkaUtils.createActorSystem(new Configuration(), new Some<>(new Tuple2<String, Object>("localhost", 0)));
        }
        for (int i = 0; i < webMonitor.length; i++) {
            LeaderRetrievalService lrs = ZooKeeperUtils.createLeaderRetrievalService(config);
            leaderRetrievalServices.add(lrs);
            webMonitor[i] = new WebRuntimeMonitor(config, lrs, jobManagerSystem[i]);
        }
        ActorRef[] jobManager = new ActorRef[2];
        String[] jobManagerAddress = new String[2];
        for (int i = 0; i < jobManager.length; i++) {
            Configuration jmConfig = config.clone();
            jmConfig.setInteger(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, webMonitor[i].getServerPort());
            jobManager[i] = JobManager.startJobManagerActors(jmConfig, jobManagerSystem[i], TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), JobManager.class, MemoryArchivist.class)._1();
            jobManagerAddress[i] = AkkaUtils.getAkkaURL(jobManagerSystem[i], jobManager[i]);
            webMonitor[i].start(jobManagerAddress[i]);
        }
        LeaderRetrievalService lrs = ZooKeeperUtils.createLeaderRetrievalService(config);
        leaderRetrievalServices.add(lrs);
        TestingListener leaderListener = new TestingListener();
        lrs.start(leaderListener);
        leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
        String leaderAddress = leaderListener.getAddress();
        int leaderIndex = leaderAddress.equals(jobManagerAddress[0]) ? 0 : 1;
        int followerIndex = (leaderIndex + 1) % 2;
        ActorSystem leadingSystem = jobManagerSystem[leaderIndex];
        ActorSystem followerSystem = jobManagerSystem[followerIndex];
        WebMonitor leadingWebMonitor = webMonitor[leaderIndex];
        WebMonitor followerWebMonitor = webMonitor[followerIndex];
        // For test stability reason we have to wait until we are sure that both leader
        // listeners have been notified.
        JobManagerRetriever leadingRetriever = Whitebox.getInternalState(leadingWebMonitor, "retriever");
        JobManagerRetriever followerRetriever = Whitebox.getInternalState(followerWebMonitor, "retriever");
        // Wait for the initial notifications
        waitForLeaderNotification(leadingSystem, jobManager[leaderIndex], leadingRetriever, deadline);
        waitForLeaderNotification(leadingSystem, jobManager[leaderIndex], followerRetriever, deadline);
        try (HttpTestClient leaderClient = new HttpTestClient("localhost", leadingWebMonitor.getServerPort());
            HttpTestClient followingClient = new HttpTestClient("localhost", followerWebMonitor.getServerPort())) {
            String expected = new Scanner(new File(MAIN_RESOURCES_PATH + "/index.html")).useDelimiter("\\A").next();
            // Request the file from the leading web server
            leaderClient.sendGetRequest("index.html", deadline.timeLeft());
            HttpTestClient.SimpleHttpResponse response = leaderClient.getNextResponse(deadline.timeLeft());
            assertEquals(HttpResponseStatus.OK, response.getStatus());
            assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("html"));
            assertEquals(expected, response.getContent());
            // Request the file from the following web server
            followingClient.sendGetRequest("index.html", deadline.timeLeft());
            response = followingClient.getNextResponse(deadline.timeLeft());
            assertEquals(HttpResponseStatus.TEMPORARY_REDIRECT, response.getStatus());
            assertTrue(response.getLocation().contains(String.valueOf(leadingWebMonitor.getServerPort())));
            // Kill the leader
            leadingSystem.shutdown();
            // Wait for the notification of the follower
            waitForLeaderNotification(followerSystem, jobManager[followerIndex], followerRetriever, deadline);
            // Same request to the new leader
            followingClient.sendGetRequest("index.html", deadline.timeLeft());
            response = followingClient.getNextResponse(deadline.timeLeft());
            assertEquals(HttpResponseStatus.OK, response.getStatus());
            assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("html"));
            assertEquals(expected, response.getContent());
            // Simple overview request
            followingClient.sendGetRequest("/overview", deadline.timeLeft());
            response = followingClient.getNextResponse(deadline.timeLeft());
            assertEquals(HttpResponseStatus.OK, response.getStatus());
            assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("json"));
            assertTrue(response.getContent().contains("\"taskmanagers\":1") || response.getContent().contains("\"taskmanagers\":0"));
        }
    } finally {
        for (ActorSystem system : jobManagerSystem) {
            if (system != null) {
                system.shutdown();
            }
        }
        for (WebMonitor monitor : webMonitor) {
            monitor.stop();
        }
        for (LeaderRetrievalService lrs : leaderRetrievalServices) {
            lrs.stop();
        }
    }
}
Also used : ActorSystem(akka.actor.ActorSystem) Scanner(java.util.Scanner) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) ArrayList(java.util.ArrayList) TestingListener(org.apache.flink.runtime.leaderelection.TestingListener) HttpTestClient(org.apache.flink.runtime.webmonitor.testutils.HttpTestClient) TestingServer(org.apache.curator.test.TestingServer) Path(java.nio.file.Path) Deadline(scala.concurrent.duration.Deadline) Some(scala.Some) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) File(java.io.File) Test(org.junit.Test)

Example 79 with TestingServer

use of org.apache.curator.test.TestingServer in project flink by apache.

the class WebRuntimeMonitorITCase method testLeaderNotAvailable.

@Test
public void testLeaderNotAvailable() throws Exception {
    final Deadline deadline = TestTimeout.fromNow();
    ActorSystem actorSystem = null;
    WebRuntimeMonitor webRuntimeMonitor = null;
    try (TestingServer zooKeeper = new TestingServer()) {
        File logDir = temporaryFolder.newFolder();
        Path logFile = Files.createFile(new File(logDir, "jobmanager.log").toPath());
        Files.createFile(new File(logDir, "jobmanager.out").toPath());
        final Configuration config = new Configuration();
        config.setInteger(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, 0);
        config.setString(ConfigConstants.JOB_MANAGER_WEB_LOG_PATH_KEY, logFile.toString());
        config.setString(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER");
        config.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeper.getConnectString());
        actorSystem = AkkaUtils.createDefaultActorSystem();
        LeaderRetrievalService leaderRetrievalService = mock(LeaderRetrievalService.class);
        webRuntimeMonitor = new WebRuntimeMonitor(config, leaderRetrievalService, actorSystem);
        webRuntimeMonitor.start("akka://schmakka");
        try (HttpTestClient client = new HttpTestClient("localhost", webRuntimeMonitor.getServerPort())) {
            client.sendGetRequest("index.html", deadline.timeLeft());
            HttpTestClient.SimpleHttpResponse response = client.getNextResponse();
            assertEquals(HttpResponseStatus.SERVICE_UNAVAILABLE, response.getStatus());
            assertEquals(MimeTypes.getMimeTypeForExtension("txt"), response.getType());
            assertTrue(response.getContent().contains("refresh"));
        }
    } finally {
        if (actorSystem != null) {
            actorSystem.shutdown();
        }
        if (webRuntimeMonitor != null) {
            webRuntimeMonitor.stop();
        }
    }
}
Also used : ActorSystem(akka.actor.ActorSystem) TestingServer(org.apache.curator.test.TestingServer) Path(java.nio.file.Path) HttpTestClient(org.apache.flink.runtime.webmonitor.testutils.HttpTestClient) Configuration(org.apache.flink.configuration.Configuration) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) Deadline(scala.concurrent.duration.Deadline) File(java.io.File) Test(org.junit.Test)

Example 80 with TestingServer

use of org.apache.curator.test.TestingServer in project hadoop by apache.

the class TestZKRMStateStoreZKClientConnections method setupZKServer.

@Before
public void setupZKServer() throws Exception {
    testingServer = new TestingServer();
    testingServer.start();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) Before(org.junit.Before)

Aggregations

TestingServer (org.apache.curator.test.TestingServer)150 Before (org.junit.Before)38 Test (org.junit.Test)30 CuratorFramework (org.apache.curator.framework.CuratorFramework)28 File (java.io.File)27 Properties (java.util.Properties)18 KafkaConfig (kafka.server.KafkaConfig)15 RetryOneTime (org.apache.curator.retry.RetryOneTime)15 Test (org.testng.annotations.Test)15 ExponentialBackoffRetry (org.apache.curator.retry.ExponentialBackoffRetry)14 IOException (java.io.IOException)13 Timing (org.apache.curator.test.Timing)13 HashMap (java.util.HashMap)12 ZkClient (org.I0Itec.zkclient.ZkClient)12 ServerSocket (java.net.ServerSocket)11 KafkaServerStartable (kafka.server.KafkaServerStartable)11 ZkUtils (kafka.utils.ZkUtils)11 ZkConnection (org.I0Itec.zkclient.ZkConnection)11 BeforeClass (org.junit.BeforeClass)11 TestingServerStarter (io.pravega.test.common.TestingServerStarter)9