Search in sources :

Example 26 with TestingServer

use of org.apache.curator.test.TestingServer in project storm by apache.

the class DynamicBrokersReaderTest method setUp.

@Before
public void setUp() throws Exception {
    server = new TestingServer();
    String connectionString = server.getConnectString();
    Map<String, Object> conf = new HashMap<>();
    conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
    conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
    conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
    conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
    ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
    zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
    dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
    Map<String, Object> conf2 = new HashMap<>();
    conf2.putAll(conf);
    conf2.put("kafka.topic.wildcard.match", true);
    wildCardBrokerReader = new DynamicBrokersReader(conf2, connectionString, masterPath, "^test.*$");
    zookeeper.start();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) HashMap(java.util.HashMap) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) Before(org.junit.Before)

Example 27 with TestingServer

use of org.apache.curator.test.TestingServer in project storm by apache.

the class ZkCoordinatorTest method setUp.

@Before
public void setUp() throws Exception {
    MockitoAnnotations.initMocks(this);
    server = new TestingServer();
    String connectionString = server.getConnectString();
    ZkHosts hosts = new ZkHosts(connectionString);
    hosts.refreshFreqSecs = 1;
    spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
    Map conf = buildZookeeperConfig(server);
    state = new ZkState(conf);
    simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
    when(dynamicPartitionConnections.register(any(Broker.class), any(String.class), anyInt())).thenReturn(simpleConsumer);
}
Also used : TestingServer(org.apache.curator.test.TestingServer) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer) Before(org.junit.Before)

Example 28 with TestingServer

use of org.apache.curator.test.TestingServer in project druid by druid-io.

the class CuratorTestBase method setupServerAndCurator.

protected void setupServerAndCurator() throws Exception {
    server = new TestingServer();
    timing = new Timing();
    curator = CuratorFrameworkFactory.builder().connectString(server.getConnectString()).sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()).retryPolicy(new RetryOneTime(1)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) RetryOneTime(org.apache.curator.retry.RetryOneTime) Timing(org.apache.curator.test.Timing)

Example 29 with TestingServer

use of org.apache.curator.test.TestingServer in project druid by druid-io.

the class TestKafkaExtractionCluster method setUp.

@Before
public void setUp() throws Exception {
    zkTestServer = new TestingServer(-1, temporaryFolder.newFolder(), true);
    zkTestServer.start();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            zkTestServer.stop();
        }
    });
    zkClient = new ZkClient(zkTestServer.getConnectString(), 10000, 10000, ZKStringSerializer$.MODULE$);
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            zkClient.close();
        }
    });
    if (!zkClient.exists("/kafka")) {
        zkClient.create("/kafka", null, CreateMode.PERSISTENT);
    }
    log.info("---------------------------Started ZK---------------------------");
    final String zkKafkaPath = "/kafka";
    final Properties serverProperties = new Properties();
    serverProperties.putAll(kafkaProperties);
    serverProperties.put("broker.id", "0");
    serverProperties.put("log.dir", temporaryFolder.newFolder().getAbsolutePath());
    serverProperties.put("log.cleaner.enable", "true");
    serverProperties.put("host.name", "127.0.0.1");
    serverProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
    serverProperties.put("zookeeper.session.timeout.ms", "10000");
    serverProperties.put("zookeeper.sync.time.ms", "200");
    kafkaConfig = new KafkaConfig(serverProperties);
    final long time = DateTime.parse("2015-01-01").getMillis();
    kafkaServer = new KafkaServer(kafkaConfig, new Time() {

        @Override
        public long milliseconds() {
            return time;
        }

        @Override
        public long nanoseconds() {
            return milliseconds() * 1_000_000;
        }

        @Override
        public void sleep(long ms) {
            try {
                Thread.sleep(ms);
            } catch (InterruptedException e) {
                throw Throwables.propagate(e);
            }
        }
    });
    kafkaServer.startup();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            kafkaServer.shutdown();
            kafkaServer.awaitShutdown();
        }
    });
    int sleepCount = 0;
    while (!kafkaServer.kafkaController().isActive()) {
        Thread.sleep(100);
        if (++sleepCount > 10) {
            throw new InterruptedException("Controller took to long to awaken");
        }
    }
    log.info("---------------------------Started Kafka Server---------------------------");
    final ZkClient zkClient = new ZkClient(zkTestServer.getConnectString() + zkKafkaPath, 10000, 10000, ZKStringSerializer$.MODULE$);
    try (final AutoCloseable autoCloseable = new AutoCloseable() {

        @Override
        public void close() throws Exception {
            if (zkClient.exists(zkKafkaPath)) {
                try {
                    zkClient.deleteRecursive(zkKafkaPath);
                } catch (org.I0Itec.zkclient.exception.ZkException ex) {
                    log.warn(ex, "error deleting %s zk node", zkKafkaPath);
                }
            }
            zkClient.close();
        }
    }) {
        final Properties topicProperties = new Properties();
        topicProperties.put("cleanup.policy", "compact");
        if (!AdminUtils.topicExists(zkClient, topicName)) {
            AdminUtils.createTopic(zkClient, topicName, 1, 1, topicProperties);
        }
        log.info("---------------------------Created topic---------------------------");
        Assert.assertTrue(AdminUtils.topicExists(zkClient, topicName));
    }
    final Properties kafkaProducerProperties = makeProducerProperties();
    final Producer<byte[], byte[]> producer = new Producer<>(new ProducerConfig(kafkaProducerProperties));
    try (final AutoCloseable autoCloseable = new AutoCloseable() {

        @Override
        public void close() throws Exception {
            producer.close();
        }
    }) {
        producer.send(new KeyedMessage<>(topicName, StringUtils.toUtf8("abcdefg"), StringUtils.toUtf8("abcdefg")));
    }
    System.setProperty("druid.extensions.searchCurrentClassloader", "false");
    injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.of(new Module() {

        @Override
        public void configure(Binder binder) {
            binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test");
            binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0);
        }
    }, // These injections fail under IntelliJ but are required for maven
    new NamespaceExtractionModule(), new KafkaExtractionNamespaceModule()));
    mapper = injector.getInstance(ObjectMapper.class);
    log.info("--------------------------- placed default item via producer ---------------------------");
    final Map<String, String> consumerProperties = new HashMap<>(kafkaProperties);
    consumerProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
    consumerProperties.put("zookeeper.session.timeout.ms", "10000");
    consumerProperties.put("zookeeper.sync.time.ms", "200");
    final KafkaLookupExtractorFactory kafkaLookupExtractorFactory = new KafkaLookupExtractorFactory(null, topicName, consumerProperties);
    factory = (KafkaLookupExtractorFactory) mapper.readValue(mapper.writeValueAsString(kafkaLookupExtractorFactory), LookupExtractorFactory.class);
    Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaTopic(), factory.getKafkaTopic());
    Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaProperties(), factory.getKafkaProperties());
    factory.start();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            factory.close();
        }
    });
    log.info("--------------------------- started rename manager ---------------------------");
}
Also used : NamespaceExtractionModule(io.druid.server.lookup.namespace.NamespaceExtractionModule) HashMap(java.util.HashMap) Closeable(java.io.Closeable) DateTime(org.joda.time.DateTime) Time(kafka.utils.Time) Properties(java.util.Properties) Binder(com.google.inject.Binder) ProducerConfig(kafka.producer.ProducerConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) TestingServer(org.apache.curator.test.TestingServer) ZkClient(org.I0Itec.zkclient.ZkClient) IOException(java.io.IOException) KafkaServer(kafka.server.KafkaServer) Producer(kafka.javaapi.producer.Producer) Module(com.google.inject.Module) NamespaceExtractionModule(io.druid.server.lookup.namespace.NamespaceExtractionModule) KafkaConfig(kafka.server.KafkaConfig) Before(org.junit.Before)

Example 30 with TestingServer

use of org.apache.curator.test.TestingServer in project druid by druid-io.

the class OverlordTest method setupServerAndCurator.

private void setupServerAndCurator() throws Exception {
    server = new TestingServer();
    timing = new Timing();
    curator = CuratorFrameworkFactory.builder().connectString(server.getConnectString()).sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()).retryPolicy(new RetryOneTime(1)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) RetryOneTime(org.apache.curator.retry.RetryOneTime) Timing(org.apache.curator.test.Timing) PotentiallyGzippedCompressionProvider(io.druid.curator.PotentiallyGzippedCompressionProvider)

Aggregations

TestingServer (org.apache.curator.test.TestingServer)60 Before (org.junit.Before)23 Test (org.junit.Test)13 TestingServerStarter (io.pravega.test.common.TestingServerStarter)9 Cleanup (lombok.Cleanup)8 BeforeClass (org.junit.BeforeClass)8 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)7 Controller (io.pravega.client.stream.impl.Controller)7 StreamSegmentStore (io.pravega.segmentstore.contracts.StreamSegmentStore)7 PravegaConnectionListener (io.pravega.segmentstore.server.host.handler.PravegaConnectionListener)7 ServiceBuilder (io.pravega.segmentstore.server.store.ServiceBuilder)7 File (java.io.File)7 IOException (java.io.IOException)7 HashMap (java.util.HashMap)7 CuratorFramework (org.apache.curator.framework.CuratorFramework)7 ExponentialBackoffRetry (org.apache.curator.retry.ExponentialBackoffRetry)7 RetryOneTime (org.apache.curator.retry.RetryOneTime)6 ConnectionFactoryImpl (io.pravega.client.netty.impl.ConnectionFactoryImpl)5 Stream (io.pravega.client.stream.Stream)5 StreamImpl (io.pravega.client.stream.impl.StreamImpl)5