Search in sources :

Example 16 with ZkClient

use of org.I0Itec.zkclient.ZkClient in project druid by druid-io.

the class ITKafkaTest method testKafka.

@Test
public void testKafka() {
    LOG.info("Starting test: ITKafkaTest");
    // create topic
    try {
        int sessionTimeoutMs = 10000;
        int connectionTimeoutMs = 10000;
        String zkHosts = config.getZookeeperHosts();
        zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
        int numPartitions = 1;
        int replicationFactor = 1;
        Properties topicConfig = new Properties();
        AdminUtils.createTopic(zkClient, TOPIC_NAME, numPartitions, replicationFactor, topicConfig);
    } catch (TopicExistsException e) {
    // it's ok if the topic already exists
    } catch (Exception e) {
        throw new ISE(e, "could not create kafka topic");
    }
    // set up kafka producer
    Properties properties = new Properties();
    properties.put("bootstrap.servers", config.getKafkaHost());
    LOG.info("Kafka bootstrap.servers: [%s]", config.getKafkaHost());
    properties.put("acks", "all");
    properties.put("retries", "3");
    KafkaProducer<String, String> producer = new KafkaProducer<>(properties, new StringSerializer(), new StringSerializer());
    DateTimeZone zone = DateTimeZone.forID("UTC");
    // format for putting into events
    DateTimeFormatter event_fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
    // timestamp to put on events
    DateTime dt = new DateTime(zone);
    // timestamp of 1st event
    dtFirst = dt;
    // timestamp of last event
    dtLast = dt;
    // these are used to compute the expected aggregations
    int added = 0;
    int num_events = 10;
    // send data to kafka
    for (int i = 0; i < num_events; i++) {
        added += i;
        // construct the event to send
        String event = String.format(event_template, event_fmt.print(dt), i, 0, i);
        LOG.info("sending event: [%s]", event);
        try {
            // Send event to kafka
            producer.send(new ProducerRecord<String, String>(TOPIC_NAME, event)).get();
        } catch (Exception ioe) {
            throw Throwables.propagate(ioe);
        }
        dtLast = dt;
        dt = new DateTime(zone);
    }
    producer.close();
    String indexerSpec;
    // replace temp strings in indexer file
    try {
        LOG.info("indexerFile name: [%s]", INDEXER_FILE);
        indexerSpec = getTaskAsString(INDEXER_FILE).replaceAll("%%DATASOURCE%%", DATASOURCE).replaceAll("%%TOPIC%%", TOPIC_NAME).replaceAll("%%ZOOKEEPER_SERVER%%", config.getZookeeperHosts()).replaceAll("%%GROUP_ID%%", Long.toString(System.currentTimeMillis())).replaceAll("%%COUNT%%", Integer.toString(num_events));
        LOG.info("indexerFile: [%s]\n", indexerSpec);
    } catch (Exception e) {
        // log here so the message will appear in the console output
        LOG.error("could not read indexer file [%s]", INDEXER_FILE);
        throw new ISE(e, "could not read indexer file [%s]", INDEXER_FILE);
    }
    // start indexing task
    taskID = indexer.submitTask(indexerSpec);
    LOG.info("-------------SUBMITTED TASK");
    // wait for the task to finish
    indexer.waitUntilTaskCompletes(taskID, 20000, 30);
    // wait for segments to be handed off
    try {
        RetryUtil.retryUntil(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                return coordinator.areSegmentsLoaded(DATASOURCE);
            }
        }, true, 30000, 10, "Real-time generated segments loaded");
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    LOG.info("segments are present");
    segmentsExist = true;
    // put the timestamps into the query structure
    String query_response_template = null;
    InputStream is = ITKafkaTest.class.getResourceAsStream(QUERIES_FILE);
    if (null == is) {
        throw new ISE("could not open query file: %s", QUERIES_FILE);
    }
    try {
        query_response_template = IOUtils.toString(is, "UTF-8");
    } catch (IOException e) {
        throw new ISE(e, "could not read query file: %s", QUERIES_FILE);
    }
    String queryStr = query_response_template.replaceAll("%%DATASOURCE%%", DATASOURCE).replace("%%TIMEBOUNDARY_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMEBOUNDARY_RESPONSE_MAXTIME%%", TIMESTAMP_FMT.print(dtLast)).replace("%%TIMEBOUNDARY_RESPONSE_MINTIME%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMESERIES_QUERY_START%%", INTERVAL_FMT.print(dtFirst)).replace("%%TIMESERIES_QUERY_END%%", INTERVAL_FMT.print(dtFirst.plusMinutes(MINUTES_TO_SEND + 2))).replace("%%TIMESERIES_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMESERIES_ADDED%%", Integer.toString(added)).replace("%%TIMESERIES_NUMEVENTS%%", Integer.toString(num_events));
    // this query will probably be answered from the realtime task
    try {
        this.queryHelper.testQueriesFromString(queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
Also used : ZkClient(org.I0Itec.zkclient.ZkClient) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) InputStream(java.io.InputStream) IOException(java.io.IOException) Properties(java.util.Properties) TopicExistsException(kafka.common.TopicExistsException) TopicExistsException(kafka.common.TopicExistsException) IOException(java.io.IOException) DateTimeZone(org.joda.time.DateTimeZone) DateTime(org.joda.time.DateTime) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ISE(io.druid.java.util.common.ISE) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) DateTimeFormatter(org.joda.time.format.DateTimeFormatter) Test(org.testng.annotations.Test)

Example 17 with ZkClient

use of org.I0Itec.zkclient.ZkClient in project druid by druid-io.

the class TestKafkaExtractionCluster method setUp.

@Before
public void setUp() throws Exception {
    zkTestServer = new TestingServer(-1, temporaryFolder.newFolder(), true);
    zkTestServer.start();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            zkTestServer.stop();
        }
    });
    zkClient = new ZkClient(zkTestServer.getConnectString(), 10000, 10000, ZKStringSerializer$.MODULE$);
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            zkClient.close();
        }
    });
    if (!zkClient.exists("/kafka")) {
        zkClient.create("/kafka", null, CreateMode.PERSISTENT);
    }
    log.info("---------------------------Started ZK---------------------------");
    final String zkKafkaPath = "/kafka";
    final Properties serverProperties = new Properties();
    serverProperties.putAll(kafkaProperties);
    serverProperties.put("broker.id", "0");
    serverProperties.put("log.dir", temporaryFolder.newFolder().getAbsolutePath());
    serverProperties.put("log.cleaner.enable", "true");
    serverProperties.put("host.name", "127.0.0.1");
    serverProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
    serverProperties.put("zookeeper.session.timeout.ms", "10000");
    serverProperties.put("zookeeper.sync.time.ms", "200");
    kafkaConfig = new KafkaConfig(serverProperties);
    final long time = DateTime.parse("2015-01-01").getMillis();
    kafkaServer = new KafkaServer(kafkaConfig, new Time() {

        @Override
        public long milliseconds() {
            return time;
        }

        @Override
        public long nanoseconds() {
            return milliseconds() * 1_000_000;
        }

        @Override
        public void sleep(long ms) {
            try {
                Thread.sleep(ms);
            } catch (InterruptedException e) {
                throw Throwables.propagate(e);
            }
        }
    });
    kafkaServer.startup();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            kafkaServer.shutdown();
            kafkaServer.awaitShutdown();
        }
    });
    int sleepCount = 0;
    while (!kafkaServer.kafkaController().isActive()) {
        Thread.sleep(100);
        if (++sleepCount > 10) {
            throw new InterruptedException("Controller took to long to awaken");
        }
    }
    log.info("---------------------------Started Kafka Server---------------------------");
    final ZkClient zkClient = new ZkClient(zkTestServer.getConnectString() + zkKafkaPath, 10000, 10000, ZKStringSerializer$.MODULE$);
    try (final AutoCloseable autoCloseable = new AutoCloseable() {

        @Override
        public void close() throws Exception {
            if (zkClient.exists(zkKafkaPath)) {
                try {
                    zkClient.deleteRecursive(zkKafkaPath);
                } catch (org.I0Itec.zkclient.exception.ZkException ex) {
                    log.warn(ex, "error deleting %s zk node", zkKafkaPath);
                }
            }
            zkClient.close();
        }
    }) {
        final Properties topicProperties = new Properties();
        topicProperties.put("cleanup.policy", "compact");
        if (!AdminUtils.topicExists(zkClient, topicName)) {
            AdminUtils.createTopic(zkClient, topicName, 1, 1, topicProperties);
        }
        log.info("---------------------------Created topic---------------------------");
        Assert.assertTrue(AdminUtils.topicExists(zkClient, topicName));
    }
    final Properties kafkaProducerProperties = makeProducerProperties();
    final Producer<byte[], byte[]> producer = new Producer<>(new ProducerConfig(kafkaProducerProperties));
    try (final AutoCloseable autoCloseable = new AutoCloseable() {

        @Override
        public void close() throws Exception {
            producer.close();
        }
    }) {
        producer.send(new KeyedMessage<>(topicName, StringUtils.toUtf8("abcdefg"), StringUtils.toUtf8("abcdefg")));
    }
    System.setProperty("druid.extensions.searchCurrentClassloader", "false");
    injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.of(new Module() {

        @Override
        public void configure(Binder binder) {
            binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test");
            binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0);
        }
    }, // These injections fail under IntelliJ but are required for maven
    new NamespaceExtractionModule(), new KafkaExtractionNamespaceModule()));
    mapper = injector.getInstance(ObjectMapper.class);
    log.info("--------------------------- placed default item via producer ---------------------------");
    final Map<String, String> consumerProperties = new HashMap<>(kafkaProperties);
    consumerProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
    consumerProperties.put("zookeeper.session.timeout.ms", "10000");
    consumerProperties.put("zookeeper.sync.time.ms", "200");
    final KafkaLookupExtractorFactory kafkaLookupExtractorFactory = new KafkaLookupExtractorFactory(null, topicName, consumerProperties);
    factory = (KafkaLookupExtractorFactory) mapper.readValue(mapper.writeValueAsString(kafkaLookupExtractorFactory), LookupExtractorFactory.class);
    Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaTopic(), factory.getKafkaTopic());
    Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaProperties(), factory.getKafkaProperties());
    factory.start();
    closer.register(new Closeable() {

        @Override
        public void close() throws IOException {
            factory.close();
        }
    });
    log.info("--------------------------- started rename manager ---------------------------");
}
Also used : NamespaceExtractionModule(io.druid.server.lookup.namespace.NamespaceExtractionModule) HashMap(java.util.HashMap) Closeable(java.io.Closeable) DateTime(org.joda.time.DateTime) Time(kafka.utils.Time) Properties(java.util.Properties) Binder(com.google.inject.Binder) ProducerConfig(kafka.producer.ProducerConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) TestingServer(org.apache.curator.test.TestingServer) ZkClient(org.I0Itec.zkclient.ZkClient) IOException(java.io.IOException) KafkaServer(kafka.server.KafkaServer) Producer(kafka.javaapi.producer.Producer) Module(com.google.inject.Module) NamespaceExtractionModule(io.druid.server.lookup.namespace.NamespaceExtractionModule) KafkaConfig(kafka.server.KafkaConfig) Before(org.junit.Before)

Example 18 with ZkClient

use of org.I0Itec.zkclient.ZkClient in project databus by linkedin.

the class TestUtil method startZkServer.

public static ZkServer startZkServer(String zkTestDataRootDir, int machineId, int port, int tickTime) throws IOException {
    File zkTestDataRootDirFile = new File(zkTestDataRootDir);
    zkTestDataRootDirFile.mkdirs();
    String dataPath = zkTestDataRootDir + "/" + machineId + "/" + port + "/data";
    String logPath = zkTestDataRootDir + "/" + machineId + "/" + port + "/log";
    FileUtils.deleteDirectory(new File(dataPath));
    FileUtils.deleteDirectory(new File(logPath));
    IDefaultNameSpace mockDefaultNameSpace = new IDefaultNameSpace() {

        @Override
        public void createDefaultNameSpace(ZkClient zkClient) {
        }
    };
    ZkServer zkServer = new ZkServer(dataPath, logPath, mockDefaultNameSpace, port, tickTime);
    zkServer.start();
    return zkServer;
}
Also used : ZkClient(org.I0Itec.zkclient.ZkClient) IDefaultNameSpace(org.I0Itec.zkclient.IDefaultNameSpace) File(java.io.File) ZkServer(org.I0Itec.zkclient.ZkServer)

Example 19 with ZkClient

use of org.I0Itec.zkclient.ZkClient in project pinot by linkedin.

the class HelixBrokerStarterTest method setUp.

@BeforeTest
public void setUp() throws Exception {
    _zookeeperInstance = ZkStarter.startLocalZkServer();
    _zkClient = new ZkClient(ZkStarter.DEFAULT_ZK_STR);
    final String instanceId = "localhost_helixController";
    _pinotResourceManager = new PinotHelixResourceManager(ZkStarter.DEFAULT_ZK_STR, HELIX_CLUSTER_NAME, instanceId, null, 10000L, true, /*isUpdateStateModel=*/
    false);
    _pinotResourceManager.start();
    final String helixZkURL = HelixConfig.getAbsoluteZkPathForHelix(ZkStarter.DEFAULT_ZK_STR);
    _helixZkManager = HelixSetupUtils.setup(HELIX_CLUSTER_NAME, helixZkURL, instanceId, /*isUpdateStateModel=*/
    false);
    _helixAdmin = _helixZkManager.getClusterManagmentTool();
    Thread.sleep(3000);
    final Configuration pinotHelixBrokerProperties = DefaultHelixBrokerConfig.getDefaultBrokerConf();
    pinotHelixBrokerProperties.addProperty(CommonConstants.Helix.KEY_OF_BROKER_QUERY_PORT, 8943);
    _helixBrokerStarter = new HelixBrokerStarter(HELIX_CLUSTER_NAME, ZkStarter.DEFAULT_ZK_STR, pinotHelixBrokerProperties);
    Thread.sleep(1000);
    ControllerRequestBuilderUtil.addFakeBrokerInstancesToAutoJoinHelixCluster(HELIX_CLUSTER_NAME, ZkStarter.DEFAULT_ZK_STR, 5, true);
    ControllerRequestBuilderUtil.addFakeDataInstancesToAutoJoinHelixCluster(HELIX_CLUSTER_NAME, ZkStarter.DEFAULT_ZK_STR, 1, true);
    final String tableName = "dining";
    JSONObject buildCreateOfflineTableV2JSON = ControllerRequestBuilderUtil.buildCreateOfflineTableJSON(tableName, null, null, 1);
    AbstractTableConfig config = AbstractTableConfig.init(buildCreateOfflineTableV2JSON.toString());
    _pinotResourceManager.addTable(config);
    for (int i = 1; i <= 5; i++) {
        addOneSegment(tableName);
        Thread.sleep(2000);
        final ExternalView externalView = _helixAdmin.getResourceExternalView(HELIX_CLUSTER_NAME, TableNameBuilder.OFFLINE_TABLE_NAME_BUILDER.forTable(tableName));
        Assert.assertEquals(externalView.getPartitionSet().size(), i);
    }
}
Also used : ZkClient(org.I0Itec.zkclient.ZkClient) ExternalView(org.apache.helix.model.ExternalView) PinotHelixResourceManager(com.linkedin.pinot.controller.helix.core.PinotHelixResourceManager) Configuration(org.apache.commons.configuration.Configuration) JSONObject(org.json.JSONObject) HelixBrokerStarter(com.linkedin.pinot.broker.broker.helix.HelixBrokerStarter) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) BeforeTest(org.testng.annotations.BeforeTest)

Example 20 with ZkClient

use of org.I0Itec.zkclient.ZkClient in project pinot by linkedin.

the class SegmentAssignmentStrategyTest method setup.

@BeforeTest
public void setup() throws Exception {
    _zookeeperInstance = ZkStarter.startLocalZkServer();
    _zkClient = new ZkClient(ZK_SERVER);
    final String zkPath = "/" + HELIX_CLUSTER_NAME;
    if (_zkClient.exists(zkPath)) {
        _zkClient.deleteRecursive(zkPath);
    }
    final String instanceId = "localhost_helixController";
    _pinotHelixResourceManager = new PinotHelixResourceManager(ZK_SERVER, HELIX_CLUSTER_NAME, instanceId, null, 10000L, true, /*isUpdateStateModel=*/
    false);
    _pinotHelixResourceManager.start();
    final String helixZkURL = HelixConfig.getAbsoluteZkPathForHelix(ZK_SERVER);
    _helixZkManager = HelixSetupUtils.setup(HELIX_CLUSTER_NAME, helixZkURL, instanceId, /*isUpdateStateModel=*/
    false);
    _helixAdmin = _helixZkManager.getClusterManagmentTool();
    ControllerRequestBuilderUtil.addFakeDataInstancesToAutoJoinHelixCluster(HELIX_CLUSTER_NAME, ZK_SERVER, _numServerInstance, true);
    ControllerRequestBuilderUtil.addFakeBrokerInstancesToAutoJoinHelixCluster(HELIX_CLUSTER_NAME, ZK_SERVER, _numBrokerInstance, true);
    Thread.sleep(3000);
    Assert.assertEquals(_helixAdmin.getInstancesInClusterWithTag(HELIX_CLUSTER_NAME, "DefaultTenant_OFFLINE").size(), _numServerInstance);
    Assert.assertEquals(_helixAdmin.getInstancesInClusterWithTag(HELIX_CLUSTER_NAME, "DefaultTenant_REALTIME").size(), _numServerInstance);
    Assert.assertEquals(_helixAdmin.getInstancesInClusterWithTag(HELIX_CLUSTER_NAME, "DefaultTenant_BROKER").size(), _numBrokerInstance);
}
Also used : ZkClient(org.I0Itec.zkclient.ZkClient) PinotHelixResourceManager(com.linkedin.pinot.controller.helix.core.PinotHelixResourceManager) BeforeTest(org.testng.annotations.BeforeTest)

Aggregations

ZkClient (org.I0Itec.zkclient.ZkClient)28 Properties (java.util.Properties)8 ZkUtils (kafka.utils.ZkUtils)7 ZkConnection (org.I0Itec.zkclient.ZkConnection)6 Before (org.junit.Before)6 File (java.io.File)4 IOException (java.io.IOException)4 InputStream (java.io.InputStream)4 KafkaConfig (kafka.server.KafkaConfig)4 DateTime (org.joda.time.DateTime)3 PinotHelixResourceManager (com.linkedin.pinot.controller.helix.core.PinotHelixResourceManager)2 URL (com.weibo.api.motan.rpc.URL)2 ISE (io.druid.java.util.common.ISE)2 TopicExistsException (kafka.common.TopicExistsException)2 MockTime (kafka.utils.MockTime)2 Time (kafka.utils.Time)2 EmbeddedZookeeper (kafka.zk.EmbeddedZookeeper)2 IDefaultNameSpace (org.I0Itec.zkclient.IDefaultNameSpace)2 ZkServer (org.I0Itec.zkclient.ZkServer)2 TestingServer (org.apache.curator.test.TestingServer)2