Search in sources :

Example 11 with KafkaProducer

use of org.apache.kafka.clients.producer.KafkaProducer in project opennms by OpenNMS.

the class KafkaMessageProducer method produce.

public void produce() {
    Properties props = new Properties();
    props.put("bootstrap.servers", getBootStrapServer());
    props.put("acks", "all");
    props.put("retries", 0);
    props.put("batch.size", 16384);
    props.put("linger.ms", 1);
    props.put("buffer.memory", 33554432);
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    Producer<String, String> producer = new KafkaProducer<>(props);
    for (int i = 0; i < 1000; i++) {
        producer.send(new ProducerRecord<String, String>("USER_TOPIC", Integer.toString(i), "This is  " + Integer.toString(i) + "  Message"));
    }
    LOGGER.info("produced 1000 messages in USER_TOPIC");
    producer.close();
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Properties(java.util.Properties)

Example 12 with KafkaProducer

use of org.apache.kafka.clients.producer.KafkaProducer in project metron by apache.

the class LoadGenerator method main.

public static void main(String[] args) throws Exception {
    CommandLine cli = LoadOptions.parse(new PosixParser(), args);
    EnumMap<LoadOptions, Optional<Object>> evaluatedArgs = LoadOptions.createConfig(cli);
    Map<String, Object> kafkaConfig = new HashMap<>();
    kafkaConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    kafkaConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    kafkaConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    kafkaConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    if (LoadOptions.ZK.has(cli)) {
        String zkQuorum = (String) evaluatedArgs.get(LoadOptions.ZK).get();
        kafkaConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, Joiner.on(",").join(KafkaUtils.INSTANCE.getBrokersFromZookeeper(zkQuorum)));
    }
    String groupId = evaluatedArgs.get(LoadOptions.CONSUMER_GROUP).get().toString();
    System.out.println("Consumer Group: " + groupId);
    kafkaConfig.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    if (LoadOptions.KAFKA_CONFIG.has(cli)) {
        kafkaConfig.putAll((Map<String, Object>) evaluatedArgs.get(LoadOptions.KAFKA_CONFIG).get());
    }
    kafkaProducer = ThreadLocal.withInitial(() -> new KafkaProducer<>(kafkaConfig));
    int numThreads = (int) evaluatedArgs.get(LoadOptions.NUM_THREADS).get();
    System.out.println("Thread pool size: " + numThreads);
    pool = Executors.newFixedThreadPool(numThreads);
    Optional<Object> eps = evaluatedArgs.get(LoadOptions.EPS);
    Optional<Object> outputTopic = evaluatedArgs.get(LoadOptions.OUTPUT_TOPIC);
    Optional<Object> monitorTopic = evaluatedArgs.get(LoadOptions.MONITOR_TOPIC);
    long sendDelta = (long) evaluatedArgs.get(LoadOptions.SEND_DELTA).get();
    long monitorDelta = (long) evaluatedArgs.get(LoadOptions.MONITOR_DELTA).get();
    if ((eps.isPresent() && outputTopic.isPresent()) || monitorTopic.isPresent()) {
        Timer timer = new Timer(false);
        long startTimeMs = System.currentTimeMillis();
        if (outputTopic.isPresent() && eps.isPresent()) {
            List<String> templates = (List<String>) evaluatedArgs.get(LoadOptions.TEMPLATE).get();
            if (templates.isEmpty()) {
                System.out.println("Empty templates, so nothing to do.");
                return;
            }
            Optional<Object> biases = evaluatedArgs.get(LoadOptions.BIASED_SAMPLE);
            Sampler sampler = new UnbiasedSampler();
            if (biases.isPresent()) {
                sampler = new BiasedSampler((List<Map.Entry<Integer, Integer>>) biases.get(), templates.size());
            }
            MessageGenerator generator = new MessageGenerator(templates, sampler);
            Long targetLoad = (Long) eps.get();
            int periodsPerSecond = (int) (1000 / sendDelta);
            long messagesPerPeriod = targetLoad / periodsPerSecond;
            String outputTopicStr = (String) outputTopic.get();
            System.out.println("Generating data to " + outputTopicStr + " at " + targetLoad + " events per second");
            System.out.println("Sending " + messagesPerPeriod + " messages to " + outputTopicStr + " every " + sendDelta + "ms");
            timer.scheduleAtFixedRate(new SendToKafka(outputTopicStr, messagesPerPeriod, numThreads, generator, pool, numSent, kafkaProducer), 0, sendDelta);
        }
        List<AbstractMonitor> monitors = new ArrayList<>();
        if (outputTopic.isPresent() && monitorTopic.isPresent()) {
            System.out.println("Monitoring " + monitorTopic.get() + " every " + monitorDelta + " ms");
            monitors.add(new EPSGeneratedMonitor(outputTopic, numSent));
            monitors.add(new EPSThroughputWrittenMonitor(monitorTopic, kafkaConfig));
        } else if (outputTopic.isPresent() && !monitorTopic.isPresent()) {
            System.out.println("Monitoring " + outputTopic.get() + " every " + monitorDelta + " ms");
            monitors.add(new EPSGeneratedMonitor(outputTopic, numSent));
            monitors.add(new EPSThroughputWrittenMonitor(outputTopic, kafkaConfig));
        } else if (!outputTopic.isPresent() && monitorTopic.isPresent()) {
            System.out.println("Monitoring " + monitorTopic.get() + " every " + monitorDelta + " ms");
            monitors.add(new EPSThroughputWrittenMonitor(monitorTopic, kafkaConfig));
        } else if (!outputTopic.isPresent() && !monitorTopic.isPresent()) {
            System.out.println("You have not specified an output topic or a monitoring topic, so I have nothing to do here.");
        }
        int lookback = (int) evaluatedArgs.get(LoadOptions.SUMMARY_LOOKBACK).get();
        if (lookback > 0) {
            System.out.println("Summarizing over the last " + lookback + " monitoring periods (" + lookback * monitorDelta + "ms)");
        } else {
            System.out.println("Turning off summarization.");
        }
        final CSVWriter csvWriter = new CSVWriter((File) evaluatedArgs.get(LoadOptions.CSV).orElse(null));
        Writer writer = new Writer(monitors, lookback, new ArrayList<Consumer<Writable>>() {

            {
                add(new ConsoleWriter());
                add(csvWriter);
            }
        });
        timer.scheduleAtFixedRate(new MonitorTask(writer), 0, monitorDelta);
        Optional<Object> timeLimit = evaluatedArgs.get(LoadOptions.TIME_LIMIT);
        if (timeLimit.isPresent()) {
            System.out.println("Ending in " + timeLimit.get() + " ms.");
            timer.schedule(new TimerTask() {

                @Override
                public void run() {
                    timer.cancel();
                    long durationS = (System.currentTimeMillis() - startTimeMs) / 1000;
                    System.out.println("\nGenerated " + numSent.get() + " in " + durationS + " seconds.");
                    csvWriter.close();
                    System.exit(0);
                }
            }, (Long) timeLimit.get());
        }
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) HashMap(java.util.HashMap) PosixParser(org.apache.commons.cli.PosixParser) ArrayList(java.util.ArrayList) ConsoleWriter(org.apache.metron.performance.load.monitor.writers.ConsoleWriter) CSVWriter(org.apache.metron.performance.load.monitor.writers.CSVWriter) MonitorTask(org.apache.metron.performance.load.monitor.MonitorTask) UnbiasedSampler(org.apache.metron.performance.sampler.UnbiasedSampler) Consumer(java.util.function.Consumer) TimerTask(java.util.TimerTask) ArrayList(java.util.ArrayList) List(java.util.List) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Optional(java.util.Optional) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) BiasedSampler(org.apache.metron.performance.sampler.BiasedSampler) EPSGeneratedMonitor(org.apache.metron.performance.load.monitor.EPSGeneratedMonitor) CommandLine(org.apache.commons.cli.CommandLine) Timer(java.util.Timer) AbstractMonitor(org.apache.metron.performance.load.monitor.AbstractMonitor) BiasedSampler(org.apache.metron.performance.sampler.BiasedSampler) Sampler(org.apache.metron.performance.sampler.Sampler) UnbiasedSampler(org.apache.metron.performance.sampler.UnbiasedSampler) AtomicLong(java.util.concurrent.atomic.AtomicLong) EPSThroughputWrittenMonitor(org.apache.metron.performance.load.monitor.EPSThroughputWrittenMonitor) HashMap(java.util.HashMap) Map(java.util.Map) EnumMap(java.util.EnumMap) Writer(org.apache.metron.performance.load.monitor.writers.Writer) ConsoleWriter(org.apache.metron.performance.load.monitor.writers.ConsoleWriter) CSVWriter(org.apache.metron.performance.load.monitor.writers.CSVWriter)

Example 13 with KafkaProducer

use of org.apache.kafka.clients.producer.KafkaProducer in project metron by apache.

the class SendToKafkaTest method testWritesCorrectNumber.

@Test
public void testWritesCorrectNumber() {
    ExecutorService executor = ForkJoinPool.commonPool();
    AtomicLong numSent = new AtomicLong(0);
    long expectedSent = 100;
    SendToKafka sender = new SendToKafka(null, expectedSent, 10, () -> "msg", executor, numSent, ThreadLocal.withInitial(() -> null)) {

        @Override
        protected Future<?> sendToKafka(KafkaProducer producer, String kafkaTopic, String message) {
            Assert.assertEquals(message, "msg");
            return ForkJoinPool.commonPool().submit(() -> {
                numSent.incrementAndGet();
            });
        }
    };
    sender.run();
    Assert.assertEquals(numSent.get(), expectedSent);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) AtomicLong(java.util.concurrent.atomic.AtomicLong) ExecutorService(java.util.concurrent.ExecutorService) Test(org.junit.Test)

Example 14 with KafkaProducer

use of org.apache.kafka.clients.producer.KafkaProducer in project apex-malhar by apache.

the class EmbeddedKafka method publish.

public void publish(String topic, List<String> messages) {
    Properties producerProps = new Properties();
    producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
    producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
    producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    try (KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps)) {
        for (String message : messages) {
            ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, message.getBytes(StandardCharsets.UTF_8));
            producer.send(data);
        }
    }
    List<KafkaServer> servers = new ArrayList<KafkaServer>();
    servers.add(kafkaServer);
    TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 30000);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) KafkaServer(kafka.server.KafkaServer) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ArrayList(java.util.ArrayList) Properties(java.util.Properties)

Example 15 with KafkaProducer

use of org.apache.kafka.clients.producer.KafkaProducer in project druid by druid-io.

the class ITKafkaIndexingServiceTest method testKafka.

@Test
public void testKafka() {
    LOG.info("Starting test: ITKafkaIndexingServiceTest");
    // create topic
    try {
        int sessionTimeoutMs = 10000;
        int connectionTimeoutMs = 10000;
        String zkHosts = config.getZookeeperHosts();
        zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
        int numPartitions = 4;
        int replicationFactor = 1;
        Properties topicConfig = new Properties();
        AdminUtils.createTopic(zkClient, TOPIC_NAME, numPartitions, replicationFactor, topicConfig);
    } catch (TopicExistsException e) {
    // it's ok if the topic already exists
    } catch (Exception e) {
        throw new ISE(e, "could not create kafka topic");
    }
    String spec;
    try {
        LOG.info("supervisorSpec name: [%s]", INDEXER_FILE);
        spec = getTaskAsString(INDEXER_FILE).replaceAll("%%DATASOURCE%%", DATASOURCE).replaceAll("%%TOPIC%%", TOPIC_NAME).replaceAll("%%KAFKA_BROKER%%", config.getKafkaHost());
        LOG.info("supervisorSpec: [%s]\n", spec);
    } catch (Exception e) {
        LOG.error("could not read file [%s]", INDEXER_FILE);
        throw new ISE(e, "could not read file [%s]", INDEXER_FILE);
    }
    // start supervisor
    supervisorId = indexer.submitSupervisor(spec);
    LOG.info("Submitted supervisor");
    // set up kafka producer
    Properties properties = new Properties();
    properties.put("bootstrap.servers", config.getKafkaHost());
    LOG.info("Kafka bootstrap.servers: [%s]", config.getKafkaHost());
    properties.put("acks", "all");
    properties.put("retries", "3");
    KafkaProducer<String, String> producer = new KafkaProducer<>(properties, new StringSerializer(), new StringSerializer());
    DateTimeZone zone = DateTimeZone.forID("UTC");
    // format for putting into events
    DateTimeFormatter event_fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
    // timestamp to put on events
    DateTime dt = new DateTime(zone);
    // timestamp of 1st event
    dtFirst = dt;
    // timestamp of last event
    dtLast = dt;
    // these are used to compute the expected aggregations
    int added = 0;
    int num_events = 0;
    // send data to kafka
    while (num_events < NUM_EVENTS_TO_SEND) {
        num_events++;
        added += num_events;
        // construct the event to send
        String event = String.format(event_template, event_fmt.print(dt), num_events, 0, num_events);
        LOG.info("sending event: [%s]", event);
        try {
            producer.send(new ProducerRecord<String, String>(TOPIC_NAME, event)).get();
        } catch (Exception ioe) {
            throw Throwables.propagate(ioe);
        }
        dtLast = dt;
        dt = new DateTime(zone);
    }
    producer.close();
    LOG.info("Waiting for [%s] millis for Kafka indexing tasks to consume events", WAIT_TIME_MILLIS);
    try {
        Thread.sleep(WAIT_TIME_MILLIS);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException(e);
    }
    InputStream is = ITKafkaIndexingServiceTest.class.getResourceAsStream(QUERIES_FILE);
    if (null == is) {
        throw new ISE("could not open query file: %s", QUERIES_FILE);
    }
    // put the timestamps into the query structure
    String query_response_template;
    try {
        query_response_template = IOUtils.toString(is, "UTF-8");
    } catch (IOException e) {
        throw new ISE(e, "could not read query file: %s", QUERIES_FILE);
    }
    String queryStr = query_response_template.replaceAll("%%DATASOURCE%%", DATASOURCE).replace("%%TIMEBOUNDARY_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMEBOUNDARY_RESPONSE_MAXTIME%%", TIMESTAMP_FMT.print(dtLast)).replace("%%TIMEBOUNDARY_RESPONSE_MINTIME%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMESERIES_QUERY_START%%", INTERVAL_FMT.print(dtFirst)).replace("%%TIMESERIES_QUERY_END%%", INTERVAL_FMT.print(dtLast.plusMinutes(2))).replace("%%TIMESERIES_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMESERIES_ADDED%%", Integer.toString(added)).replace("%%TIMESERIES_NUMEVENTS%%", Integer.toString(num_events));
    // this query will probably be answered from the indexing tasks but possibly from 2 historical segments / 2 indexing
    try {
        this.queryHelper.testQueriesFromString(queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    LOG.info("Shutting down Kafka Supervisor");
    indexer.shutdownSupervisor(supervisorId);
    // wait for all kafka indexing tasks to finish
    LOG.info("Waiting for all kafka indexing tasks to finish");
    RetryUtil.retryUntilTrue(new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return (indexer.getPendingTasks().size() + indexer.getRunningTasks().size() + indexer.getWaitingTasks().size()) == 0;
        }
    }, "Waiting for Tasks Completion");
    // wait for segments to be handed off
    try {
        RetryUtil.retryUntil(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                return coordinator.areSegmentsLoaded(DATASOURCE);
            }
        }, true, 30000, 10, "Real-time generated segments loaded");
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    LOG.info("segments are present");
    segmentsExist = true;
    // this query will be answered by at least 1 historical segment, most likely 2, and possibly up to all 4
    try {
        this.queryHelper.testQueriesFromString(queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
Also used : ZkClient(org.I0Itec.zkclient.ZkClient) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) InputStream(java.io.InputStream) IOException(java.io.IOException) Properties(java.util.Properties) TopicExistsException(kafka.common.TopicExistsException) TopicExistsException(kafka.common.TopicExistsException) IOException(java.io.IOException) DateTimeZone(org.joda.time.DateTimeZone) DateTime(org.joda.time.DateTime) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ISE(io.druid.java.util.common.ISE) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) DateTimeFormatter(org.joda.time.format.DateTimeFormatter) Test(org.testng.annotations.Test)

Aggregations

KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)23 Properties (java.util.Properties)14 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)9 ArrayList (java.util.ArrayList)5 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)5 Test (org.junit.Test)4 HashMap (java.util.HashMap)3 Map (java.util.Map)3 Random (java.util.Random)3 ISE (io.druid.java.util.common.ISE)2 IOException (java.io.IOException)2 InputStream (java.io.InputStream)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 TopicExistsException (kafka.common.TopicExistsException)2 ZkClient (org.I0Itec.zkclient.ZkClient)2 Callback (org.apache.kafka.clients.producer.Callback)2 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)2 KafkaStreams (org.apache.kafka.streams.KafkaStreams)2 DateTime (org.joda.time.DateTime)2 DateTimeZone (org.joda.time.DateTimeZone)2